The goals / steps of this project are the following:
show how to calibration the camera
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
%matplotlib inline
def get_chessboard(glob_dir='camera_cal/calibration*.jpg'):
"""
return objpoints, imgpoints, chess_imgs
get the chessboard from the imgs in glob_dir
"""
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
chess_imgs = [] #array of images which has chessboard
# Make a list of calibration images
images = glob.glob(glob_dir)
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
chess_imgs.append(img)
else:
print("ret==false: ",fname)
return objpoints, imgpoints, chess_imgs
def show_chess(imgs):
"""
show the imgs.
"""
length = len(imgs)
row = int(length/3) + 1
%matplotlib inline
fig, axs = plt.subplots(row, 3, figsize=(35, 35))
# fig.subplots_adjust(hspace = .2, wspace=.001)
axis = axs.ravel()
print("The chessboards: ")
i = 0
for img in imgs:
axis[i].axis('off')
axis[i].imshow(img)
i = i + 1
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
chess_imgs = [] #array of images which has chessboard
objpoints, imgpoints, chess_imgs = get_chessboard(glob_dir='camera_cal/calibration*.jpg')
show_chess(chess_imgs)
def calibrate_camera(objpoints, imgpoint, img):
"""
return the matrix and undistort imgs
"""
# Test undistortion on an image
img_size = img.shape[0:2]
# Do camera calibration given object points and image points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
undist = cv2.undistort(img, mtx, dist, None, mtx)
return ret, mtx, dist, undist
test_img='./camera_cal/calibration1.jpg'
undist='./output_images/undist_calibration1.jpg'
img = cv2.imread(test_img)
ret, mtx, dist, dst = calibrate_camera(objpoints, imgpoints, img)
cv2.imwrite(undist, dst)
#dst = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
# Visualize undistortion
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(dst)
ax2.set_title('Undistorted Image', fontsize=30)
Finally calibration : ret, mtx, dist, dst = calibrate_camera(objpoints, imgpoints, img)
def undistort(image, mtx = mtx, dist = dist):
# img_size = image.shape[0:2]
# ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
undist = cv2.undistort(image, mtx, dist, None, mtx)
return undist
test_img='./test_images/straight_lines1.jpg'
undist='./output_images/undist_straight_lines1.jpg'
img = cv2.imread(test_img)
dst = undistort(img)
cv2.imwrite(undist, dst)
#dst = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
# Visualize undistortion
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(cv2.cvtColor(dst,cv2.COLOR_BGR2RGB))
ax2.set_title('Undistorted Image', fontsize=30)
def select_yellow(image):
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower = np.array([20,60,60])
upper = np.array([38,174, 250])
mask = cv2.inRange(hsv, lower, upper)
return mask
def select_white(image):
lower = np.array([202,202,202])
upper = np.array([255,255,255])
mask = cv2.inRange(image, lower, upper)
return mask
def comb_thresh(image):
yellow = select_yellow(image)
white = select_white(image)
combined_binary = np.zeros_like(yellow)
combined_binary[(yellow >= 1) | (white >= 1)] = 1
return combined_binary
def color_threshold(img, s_thresh=(90, 255), r_thresh = (140, 255), u_thresh = (140, 200)):
"""
input image is in BGR img.(read by cv2)
In HSV RGB YUV space
return the combine of S R U space.
"""
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
H = hls[:,:,0]
L = hls[:,:,1]
S = hls[:,:,2]
s_binary = np.zeros_like(S)
s_binary[(S > s_thresh[0]) & (S <= s_thresh[1])] = 1
# RGB colour
B = img[:,:,0]
G = img[:,:,1]
R = img[:,:,2]
r_binary = np.zeros_like(R)
r_binary[(R > r_thresh[0]) & (R <= r_thresh[1])] = 1
# YUV colour
yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
Y = yuv[:,:,0]
U = yuv[:,:,1]
V = yuv[:,:,2]
u_binary = np.zeros_like(U)
u_binary[(U > u_thresh[0]) & (U <= u_thresh[1])] = 1
#combine the color transform
combined = np.zeros_like(u_binary)
combined[ (r_binary == 1)&(s_binary == 1)] = 1
return combined
filenames = 'test_images/test*.jpg'
names = glob.glob(filenames)
color_thres = []
images = []
for filenames in names:
origin = cv2.imread(filenames) #get img
print(origin.shape)
undis = undistort(origin) #undistort
images.append(undis)
color_thre = comb_thresh(undis)
color_thres.append(color_thre)
#show the color threshold
j = 0
for binary in color_thres:
f, (axis1, axis2) = plt.subplots(1, 2, figsize=(15,10))
axis1.imshow(cv2.cvtColor(images[j], cv2.COLOR_BGR2RGB))
axis1.set_title("undis img", fontsize = 15)
axis2.imshow(binary, cmap='gray')
axis2.set_title("only color threshold", fontsize = 15)
j = j+1
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(20, 100)):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Apply x or y gradient with the OpenCV Sobel() function
# and take the absolute value
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1))
# Rescale back to 8 bit integer
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# Create a copy and apply the threshold
grad_binary = np.zeros_like(scaled_sobel)
# Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too
grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return grad_binary
def mag_thresh(image, sobel_kernel=3, mag_thresh=(30, 100)):
"""
Calculate gradient magnitude
"""
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Take both Sobel x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the gradient magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# Rescale to 8 bit
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
# Create a binary image of ones where threshold is met, zeros otherwise
mag_binary = np.zeros_like(gradmag)
mag_binary[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
return mag_binary
def dir_threshold(image, sobel_kernel=3, thresh=(0.7, 1.3)):
"""
Calculate gradient direction
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Calculate the x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the gradient direction,
# apply a threshold, and create a binary image result
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
dir_binary = np.zeros_like(absgraddir)
dir_binary[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
return dir_binary
def combine_threshold(image):
"""Input the image in BGR"""
ksize = 3;
# Apply each of the thresholding functions
gradx = abs_sobel_thresh(image, orient='x', sobel_kernel=ksize)
grady = abs_sobel_thresh(image, orient='y', sobel_kernel=ksize)
mag_binary = mag_thresh(image, sobel_kernel=ksize)
dir_binary = dir_threshold(image, sobel_kernel=ksize)
combined = np.zeros_like(dir_binary)
combined[(gradx == 1)] = 1
# combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
return combined
filenames = 'test_images/test*.jpg'
names = glob.glob(filenames)
combines_thred = []
images = []
for filenames in names:
origin = cv2.imread(filenames) #get img
undis = undistort(origin) #undistort
images.append(undis)
color_thre = color_threshold(undis)
graident_thre = combine_threshold(undis)
combined = np.zeros_like(color_thre)
combined[((color_thre == 1) | (graident_thre == 1))] = 1
combines_thred.append(combined)
#show the color threshold
j = 0
for binary in combines_thred:
f, (axis1, axis2) = plt.subplots(1, 2, figsize=(15,10))
axis1.imshow(cv2.cvtColor(images[j], cv2.COLOR_BGR2RGB))
axis1.set_title("undis img", fontsize = 15)
axis2.imshow(binary, cmap='gray')
axis2.set_title("combine color and gradient threshold", fontsize = 15)
j = j+1
perspective transform to rectify binary image ("birds-eye view").
def perspective_birds(img):
image_size = (img.shape[1], img.shape[0])
offset = 0
src = np.float32([[545, 460],
[735, 460],
[1280, 700],
[0, 700]])
dst = np.float32([[0, 0],
[1280, 0],
[1280, 720],
[0, 720]])
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, image_size)
return warped, M
filenames = 'test_images/test*.jpg'
names = glob.glob(filenames)
origins = []
brids_show = []
for filenames in names:
origin = cv2.imread(filenames) #get img
origins.append(origin)
undis = undistort(origin) #undistort
wraped, M = perspective_birds(undis)
brids_show.append(wraped)
j = 0
for origin in origins:
f, (axis1, axis2) = plt.subplots(1, 2, figsize=(15,10))
axis1.imshow(cv2.cvtColor(origin, cv2.COLOR_BGR2RGB))
axis1.set_title("undis img", fontsize = 15)
axis2.imshow(cv2.cvtColor(brids_show[j], cv2.COLOR_BGR2RGB))
axis2.set_title("undistorted brid's view", fontsize = 15)
j = j+1
filenames = 'test_images/test*.jpg'
names = glob.glob(filenames)
binary_combined = []
pers_images = []
for filenames in names:
origin = cv2.imread(filenames) #get img
undis = undistort(origin) #undistort
origin_wraped, origin_M = perspective_birds(undis)
pers_images.append(origin_wraped)
color_thre = comb_thresh(undis)
graident_thre = combine_threshold(undis)
combined = np.zeros_like(color_thre)
combined[((color_thre == 1))] = 1
wrapedd,M2 = perspective_birds(combined)
binary_combined.append(wrapedd)
#show the color threshold
j = 0
for binary in binary_combined:
f, (axis1, axis2) = plt.subplots(1, 2, figsize=(15,10))
axis1.imshow(cv2.cvtColor(pers_images[j], cv2.COLOR_BGR2RGB))
axis1.set_title("undis img", fontsize = 15)
axis2.imshow(binary, cmap='gray')
axis2.set_title("combine color & gradient threshold & brid's view", fontsize = 15)
j = j+1
implent sliding windows and FIt a polynomial
def find_line(binary_warped, draw_windows = False):
histogram = np.sum(binary[binary_warped.shape[0]//2:,:], axis=0)
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
if draw_windows:
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),
(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),
(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
origin_left_line = (lefty, leftx)
origin_right_line = (righty, rightx)
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
return left_fit, right_fit, out_img, origin_left_line ,origin_right_line
def draw_polygon(out_img,left_fitx, right_fitx, ploty, margin=100):
window_img = np.zeros_like(out_img)#create the img
#get the pts
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
return window_img
i = 0
final_lane_boundary = []
for binary_warped in binary_combined:
origin_wraped = brids_show[i]
i = i + 1
left_fit, right_fit, out_img, origin_left_line ,origin_right_line = find_line(binary_warped)
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )#binary_warped.shape[0] pts
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2] #pts on the left
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2] #pts on the right
window_img = draw_polygon(out_img, left_fitx, right_fitx, ploty)
out_img[origin_left_line[0], origin_left_line[1]] = [255, 0, 0]
out_img[origin_right_line[0], origin_right_line[1]] = [0, 0, 255]
final_show = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
final_lane_boundary.append(final_show)
f, (axis1, axis2) = plt.subplots(1, 2, figsize=(15,10))
axis1.imshow(cv2.cvtColor(origin_wraped, cv2.COLOR_BGR2RGB))
axis1.set_title("undistorted brid's view", fontsize = 15)
axis2.imshow(final_show)
axis2.set_title("lane line", fontsize = 15)
axis2.plot(left_fitx, ploty, color='yellow')
axis2.plot(right_fitx, ploty, color='yellow')
def get_curve_position(left_fit, right_fit, ploty):
y_eval = np.max(ploty)
left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
curver = (left_curverad + right_curverad)/2
#get vehicle position
right_x = right_fit[0]*720**2 + right_fit[1]*720 + right_fit[2]
left_x = left_fit[0]*720**2 + left_fit[1]*720 + left_fit[2]
position = abs((right_x + left_x))/2 #center of lane line.
# print(right_curverad,left_curverad )
dis2center = 640 - position #640 is the vehivle position
return curver, dis2center
def get_curve2_positon(left_fit, right_fit, ploty):
y_eval = np.max(ploty)
quadratic_coeff = 3e-4 # arbitrary quadratic coefficient
# For each y position generate random x position within +/-50 pix
# of the line base position in each case (x=200 for left, and x=900 for right)
left_y0 = left_fit[0]*720**2 + left_fit[1]*720 + left_fit[2]
right_yo = right_fit[0]*720**2 + right_fit[1]*720 + right_fit[2]
leftx = np.array([left_y0 + (y**2)*quadratic_coeff + np.random.randint(-50, high=51)
for y in ploty])
rightx = np.array([right_yo + (y**2)*quadratic_coeff + np.random.randint(-50, high=51)
for y in ploty])
leftx = leftx[::-1] # Reverse to match top-to-bottom in y
rightx = rightx[::-1] # Reverse to match top-to-bottom in y
# Fit a second order polynomial to pixel positions in each fake lane line
left_fit = np.polyfit(ploty, leftx, 2)
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fit = np.polyfit(ploty, rightx, 2)
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(ploty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, rightx*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Now our radius of curvature is in meters
curver = (left_curverad + right_curverad)/2
#get vehicle position
right_x = right_fit[0]*720**2 + right_fit[1]*720 + right_fit[2]
left_x = left_fit[0]*720**2 + left_fit[1]*720 + left_fit[2]
position = abs((right_x + left_x))/2
dis2center = position - 640
return curver, dis2center
def perspective_inv(img):
image_size = (img.shape[1], img.shape[0])
offset = 0
src = np.float32([[545, 460],
[735, 460],
[1280, 700],
[0, 700]])
dst = np.float32([[0, 0],
[1280, 0],
[1280, 720],
[0, 720]])
M = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(img, M, image_size)
return warped
i = 0
final_lane_boundary = []
for binary_warped in binary_combined:
origin = origins[i]
i = i + 1
left_fit, right_fit, out_img, origin_left_line ,origin_right_line = find_line(binary_warped)
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )#binary_warped.shape[0] pts
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2] #pts on the left
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2] #pts on the right
curver, dis2center = get_curve_position(left_fit, right_fit, ploty)
warp = np.zeros_like(out_img).astype(np.uint8)
pts_left = np.array([np.flipud(np.transpose(np.vstack([left_fitx, ploty])))])
pts_right = np.array([np.transpose(np.vstack([right_fitx, ploty]))])
pts = np.hstack((pts_left, pts_right))
cv2.polylines(warp, np.int_([pts]), isClosed=False, color=(0, 0, 255), thickness = 40)
cv2.fillPoly(warp, np.int_([pts]), (0, 255, 0))
unwraped = perspective_inv(warp)
final_show = cv2.addWeighted(origin, 1, unwraped, 0.3, 0)
f, axis1 = plt.subplots(1, 1, figsize=(15,10))
axis1.imshow(cv2.cvtColor(final_show, cv2.COLOR_BGR2RGB))
axis1.set_title(" lane on original image.", fontsize = 15)
axis1.text(100, 20, 'Vehicle position(right to center line): {:.2f}m'.format(dis2center*3.7/700),
color='red', fontsize=15)
axis1.text(100, 100, 'Curvature of line: {:.2f}m'.format(curver),
color='red', fontsize=15)
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
Left_fit = []
Right_fit = []
is_first = True
global Left_fit, Right_fit, is_first
def find_line_video(binary_warped):
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
global Left_fit, Right_fit
left_lane_inds = ((nonzerox > (Left_fit[0]*(nonzeroy**2) + Left_fit[1]*nonzeroy +
Left_fit[2] - margin)) & (nonzerox < (Left_fit[0]*(nonzeroy**2) +
Left_fit[1]*nonzeroy + Left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (Right_fit[0]*(nonzeroy**2) + Right_fit[1]*nonzeroy +
Right_fit[2] - margin)) & (nonzerox < (Right_fit[0]*(nonzeroy**2)+
Right_fit[1]*nonzeroy + Right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
return left_fit, right_fit
def process_image(img):
"""
input: img in RGB;
return the img with lane boundaries and numerical estimation of lane curvature and vehicle position.
"""
#undistort the img
undis = undistort(img)
#Thresholding
color_thre = color_threshold(undis)
graident_thre = combine_threshold(undis)
combined = np.zeros_like(color_thre)
combined[((color_thre == 1) | (graident_thre == 1))] = 1
#Perspective Transform
binary_warped, M = perspective_birds(combined)
# plt.imshow(binary_warped, cmap = 'gray')
#Find the lane line
global Left_fit, Right_fit, is_first
if is_first:
Left_fit, Right_fit, out_img, origin_left_line ,origin_right_line = find_line(binary_warped)
is_first = False
else:
Left_fit, Right_fit = find_line_video(binary_warped)
# print(Right_fit)
#wrap the lane boundaries into original img
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )#binary_warped.shape[0] pts
#caculate the curvature of lane line and the vehicle position
curver, dis2center = get_curve_position(Left_fit, Right_fit, ploty)
left_fitx = Left_fit[0]*ploty**2 + Left_fit[1]*ploty + Left_fit[2] #pts on the left
right_fitx = Right_fit[0]*ploty**2 + Right_fit[1]*ploty + Right_fit[2] #pts on the right
warp = np.zeros_like(img).astype(np.uint8)
pts_left = np.array([np.flipud(np.transpose(np.vstack([left_fitx, ploty])))])
pts_right = np.array([np.transpose(np.vstack([right_fitx, ploty]))])
pts = np.hstack((pts_left, pts_right))
cv2.polylines(warp, np.int_([pts]), isClosed=False, color=(0, 0, 255), thickness = 40)
cv2.fillPoly(warp, np.int_([pts]), (0, 255, 0))
unwraped = perspective_inv(warp)
final_show = cv2.addWeighted(undis, 1, unwraped, 0.3, 0)
cv2.putText(final_show, 'Vehicle position(right to center line): {:.2f}m'.format(dis2center/700*3.7),
(100,80),fontFace = 16, fontScale = 1, color=(255,255,255), thickness = 2)
cv2.putText(final_show, 'Curvature of line: {:.2f}m'.format(curver),
(100,140),fontFace = 16, fontScale = 1, color=(255,255,255), thickness = 2)
return final_show
white_output = 'project_videoline.mp4'
clip1 = VideoFileClip("project_video.mp4")
white_clip = clip1.fl_image(lambda x: process_image(x))
%time white_clip.write_videofile(white_output, audio=False)